1use crate::{os, util, Protection, QueryIter, Region, Result};
23/// Changes the memory protection of one or more pages.
4///
5/// The address range may overlap one or more pages, and if so, all pages
6/// spanning the range will be modified. The previous protection flags are not
7/// preserved (if you desire to preserve the protection flags, use
8/// [`protect_with_handle`]).
9///
10/// # Parameters
11///
12/// - The range is `[address, address + size)`
13/// - The address is rounded down to the closest page boundary.
14/// - The size may not be zero.
15/// - The size is rounded up to the closest page boundary, relative to the
16/// address.
17///
18/// # Errors
19///
20/// - If an interaction with the underlying operating system fails, an error
21/// will be returned.
22/// - If size is zero,
23/// [`Error::InvalidParameter`](crate::Error::InvalidParameter) will be
24/// returned.
25///
26/// # Safety
27///
28/// This function can violate memory safety in a myriad of ways. Read-only memory
29/// can become writable, the executable properties of code segments can be
30/// removed, etc.
31///
32/// # Examples
33///
34/// - Make an array of x86 assembly instructions executable.
35///
36/// ```
37/// # fn main() -> region::Result<()> {
38/// # if cfg!(any(target_arch = "x86", target_arch = "x86_64"))
39/// # && !cfg!(any(target_os = "openbsd", target_os = "netbsd")) {
40/// use region::Protection;
41/// let ret5 = [0xB8, 0x05, 0x00, 0x00, 0x00, 0xC3u8];
42///
43/// let x: extern "C" fn() -> i32 = unsafe {
44/// region::protect(ret5.as_ptr(), ret5.len(), region::Protection::READ_WRITE_EXECUTE)?;
45/// std::mem::transmute(ret5.as_ptr())
46/// };
47///
48/// assert_eq!(x(), 5);
49/// # }
50/// # Ok(())
51/// # }
52/// ```
53#[inline]
54pub unsafe fn protect<T>(address: *const T, size: usize, protection: Protection) -> Result<()> {
55let (address, size) = util::round_to_page_boundaries(address, size)?;
56 os::protect(address.cast(), size, protection)
57}
5859/// Temporarily changes the memory protection of one or more pages.
60///
61/// The address range may overlap one or more pages, and if so, all pages within
62/// the range will be modified. The protection flag for each page will be reset
63/// once the handle is dropped. To conditionally prevent a reset, use
64/// [`std::mem::forget`].
65///
66/// This function uses [`query_range`](crate::query_range) internally and is
67/// therefore less performant than [`protect`]. Use this function only if you
68/// need to reapply the memory protection flags of one or more regions after
69/// operations.
70///
71/// # Guard
72///
73/// Remember not to conflate the *black hole* syntax with the ignored, but
74/// unused, variable syntax. Otherwise the [`ProtectGuard`] instantly resets the
75/// protection flags of all pages.
76///
77/// ```ignore
78/// let _ = protect_with_handle(...); // Pages are instantly reset
79/// let _guard = protect_with_handle(...); // Pages are reset once `_guard` is dropped.
80/// ```
81///
82/// # Parameters
83///
84/// - The range is `[address, address + size)`
85/// - The address is rounded down to the closest page boundary.
86/// - The size may not be zero.
87/// - The size is rounded up to the closest page boundary, relative to the
88/// address.
89///
90/// # Errors
91///
92/// - If an interaction with the underlying operating system fails, an error
93/// will be returned.
94/// - If size is zero,
95/// [`Error::InvalidParameter`](crate::Error::InvalidParameter) will be
96/// returned.
97///
98/// # Safety
99///
100/// See [protect].
101#[allow(clippy::missing_inline_in_public_items)]
102pub unsafe fn protect_with_handle<T>(
103 address: *const T,
104 size: usize,
105 protection: Protection,
106) -> Result<ProtectGuard> {
107let (address, size) = util::round_to_page_boundaries(address, size)?;
108109// Preserve the current regions' flags
110let mut regions = QueryIter::new(address, size)?.collect::<Result<Vec<_>>>()?;
111112// Apply the desired protection flags
113protect(address, size, protection)?;
114115if let Some(region) = regions.first_mut() {
116// Offset the lower region to the smallest page boundary
117region.base = address.cast();
118 region.size -= address as usize - region.as_range().start;
119 }
120121if let Some(region) = regions.last_mut() {
122// Truncate the upper region to the smallest page boundary
123let protect_end = address as usize + size;
124 region.size -= region.as_range().end - protect_end;
125 }
126127Ok(ProtectGuard::new(regions))
128}
129130/// A RAII implementation of a scoped protection guard.
131///
132/// When this structure is dropped (falls out of scope), the memory regions'
133/// protection will be reset.
134#[must_use]
135pub struct ProtectGuard {
136 regions: Vec<Region>,
137}
138139impl ProtectGuard {
140#[inline(always)]
141fn new(regions: Vec<Region>) -> Self {
142Self { regions }
143 }
144}
145146impl Drop for ProtectGuard {
147#[inline]
148fn drop(&mut self) {
149let result = self
150.regions
151 .iter()
152 .try_for_each(|region| unsafe { protect(region.base, region.size, region.protection) });
153debug_assert!(result.is_ok(), "restoring region protection: {:?}", result);
154 }
155}
156157unsafe impl Send for ProtectGuard {}
158unsafe impl Sync for ProtectGuard {}
159160#[cfg(test)]
161mod tests {
162use super::*;
163use crate::tests::util::alloc_pages;
164use crate::{page, query, query_range};
165166#[test]
167fn protect_null_fails() {
168assert!(unsafe { protect(std::ptr::null::<()>(), 0, Protection::NONE) }.is_err());
169 }
170171#[test]
172 #[cfg(not(any(
173 target_os = "openbsd",
174 target_os = "netbsd",
175 all(target_vendor = "apple", target_arch = "aarch64")
176 )))]
177fn protect_can_alter_text_segments() {
178#[allow(clippy::ptr_as_ptr)]
179let address = &mut protect_can_alter_text_segments as *mut _ as *mut u8;
180unsafe {
181 protect(address, 1, Protection::READ_WRITE_EXECUTE).unwrap();
182*address = 0x90;
183 }
184 }
185186#[test]
187fn protect_updates_both_pages_for_straddling_range() -> Result<()> {
188let pz = page::size();
189190// Create a page boundary with different protection flags in the upper and
191 // lower span, so the intermediate region sizes are fixed to one page.
192let map = alloc_pages(&[
193 Protection::READ,
194 Protection::READ_EXECUTE,
195 Protection::READ_WRITE,
196 Protection::READ,
197 ]);
198199let exec_page = unsafe { map.as_ptr().add(pz) };
200let exec_page_end = unsafe { exec_page.add(pz - 1) };
201202// Change the protection over two page boundaries
203unsafe {
204 protect(exec_page_end, 2, Protection::NONE)?;
205 }
206207// Query the two inner pages
208let result = query_range(exec_page, pz * 2)?.collect::<Result<Vec<_>>>()?;
209210// On some OSs the pages are merged into one region
211assert!(matches!(result.len(), 1 | 2));
212assert_eq!(result.iter().map(Region::len).sum::<usize>(), pz * 2);
213assert_eq!(result[0].protection(), Protection::NONE);
214Ok(())
215 }
216217#[test]
218fn protect_has_inclusive_lower_and_exclusive_upper_bound() -> Result<()> {
219let map = alloc_pages(&[
220 Protection::READ_WRITE,
221 Protection::READ,
222 Protection::READ_WRITE,
223 Protection::READ,
224 ]);
225226// Alter the protection of the second page
227let second_page = unsafe { map.as_ptr().add(page::size()) };
228unsafe {
229let second_page_end = second_page.offset(page::size() as isize - 1);
230 protect(second_page_end, 1, Protection::NONE)?;
231 }
232233let regions = query_range(map.as_ptr(), page::size() * 3)?.collect::<Result<Vec<_>>>()?;
234assert_eq!(regions.len(), 3);
235assert_eq!(regions[0].protection(), Protection::READ_WRITE);
236assert_eq!(regions[1].protection(), Protection::NONE);
237assert_eq!(regions[2].protection(), Protection::READ_WRITE);
238239// Alter the protection of '2nd_page_start .. 2nd_page_end + 1'
240unsafe {
241 protect(second_page, page::size() + 1, Protection::READ_EXECUTE)?;
242 }
243244let regions = query_range(map.as_ptr(), page::size() * 3)?.collect::<Result<Vec<_>>>()?;
245assert!(regions.len() >= 2);
246assert_eq!(regions[0].protection(), Protection::READ_WRITE);
247assert_eq!(regions[1].protection(), Protection::READ_EXECUTE);
248assert!(regions[1].len() >= page::size());
249250Ok(())
251 }
252253#[test]
254fn protect_with_handle_resets_protection() -> Result<()> {
255let map = alloc_pages(&[Protection::READ]);
256257unsafe {
258let _handle = protect_with_handle(map.as_ptr(), page::size(), Protection::READ_WRITE)?;
259assert_eq!(query(map.as_ptr())?.protection(), Protection::READ_WRITE);
260 };
261262assert_eq!(query(map.as_ptr())?.protection(), Protection::READ);
263Ok(())
264 }
265266#[test]
267fn protect_with_handle_only_alters_protection_of_affected_pages() -> Result<()> {
268let pages = [
269 Protection::READ_WRITE,
270 Protection::READ,
271 Protection::READ_WRITE,
272 Protection::READ_EXECUTE,
273 Protection::NONE,
274 ];
275let map = alloc_pages(&pages);
276277let second_page = unsafe { map.as_ptr().add(page::size()) };
278let region_size = page::size() * 3;
279280unsafe {
281let _handle = protect_with_handle(second_page, region_size, Protection::NONE)?;
282let region = query(second_page)?;
283284assert_eq!(region.protection(), Protection::NONE);
285assert_eq!(region.as_ptr(), second_page);
286 }
287288let regions =
289 query_range(map.as_ptr(), page::size() * pages.len())?.collect::<Result<Vec<_>>>()?;
290291assert_eq!(regions.len(), 5);
292assert_eq!(regions[0].as_ptr(), map.as_ptr());
293for i in 0..pages.len() {
294assert_eq!(regions[i].protection(), pages[i]);
295 }
296297Ok(())
298 }
299}